#define BVT_INFO(p) ((struct bvt_dom_info *)(p)->sched_priv)
#define EBVT_INFO(p) ((struct bvt_vcpu_info *)(p)->sched_priv)
-#define CPU_INFO(cpu) ((struct bvt_cpu_info *)(schedule_data[cpu]).sched_priv)
+#define CPU_INFO(cpu) \
+ ((struct bvt_cpu_info *)(per_cpu(schedule_data, cpu).sched_priv))
#define RUNLIST(p) ((struct list_head *)&(EBVT_INFO(p)->run_list))
#define RUNQUEUE(cpu) ((struct list_head *)&(CPU_INFO(cpu)->runqueue))
#define CPU_SVT(cpu) (CPU_INFO(cpu)->svt)
/* Allocate per-CPU context if this is the first domain to be added. */
if ( CPU_INFO(v->processor) == NULL )
{
- schedule_data[v->processor].sched_priv = xmalloc(struct bvt_cpu_info);
+ per_cpu(schedule_data, v->processor).sched_priv =
+ xmalloc(struct bvt_cpu_info);
BUG_ON(CPU_INFO(v->processor) == NULL);
INIT_LIST_HEAD(RUNQUEUE(v->processor));
CPU_SVT(v->processor) = 0;
/* Deal with warping here. */
einf->evt = calc_evt(v, einf->avt);
- curr = schedule_data[cpu].curr;
+ curr = per_cpu(schedule_data, cpu).curr;
curr_evt = calc_evt(curr, calc_avt(curr, now));
/* Calculate the time the current domain would run assuming
the second smallest evt is of the newly woken domain */
if ( is_idle_vcpu(curr) || (einf->evt <= curr_evt) )
cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
- else if ( schedule_data[cpu].s_timer.expires > r_time )
- set_timer(&schedule_data[cpu].s_timer, r_time);
+ else if ( per_cpu(schedule_data, cpu).s_timer.expires > r_time )
+ set_timer(&per_cpu(schedule_data, cpu).s_timer, r_time);
}
static void bvt_sleep(struct vcpu *v)
{
- if ( schedule_data[v->processor].curr == v )
+ if ( per_cpu(schedule_data, v->processor).curr == v )
cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);
else if ( __task_on_runqueue(v) )
__del_from_runqueue(v);
* *and* the task the second lowest evt.
* this code is O(n) but we expect n to be small.
*/
- next_einf = EBVT_INFO(schedule_data[cpu].idle);
+ next_einf = EBVT_INFO(per_cpu(schedule_data, cpu).idle);
next_prime_einf = NULL;
next_evt = ~0U;
/*
* Useful macros
*/
-#define CSCHED_PCPU(_c) ((struct csched_pcpu *)schedule_data[_c].sched_priv)
+#define CSCHED_PCPU(_c) \
+ ((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv)
#define CSCHED_VCPU(_vcpu) ((struct csched_vcpu *) (_vcpu)->sched_priv)
#define CSCHED_DOM(_dom) ((struct csched_dom *) (_dom)->sched_priv)
#define RUNQ(_cpu) (&(CSCHED_PCPU(_cpu)->runq))
static inline void
__runq_tickle(unsigned int cpu, struct csched_vcpu *new)
{
- struct csched_vcpu * const cur = CSCHED_VCPU(schedule_data[cpu].curr);
+ struct csched_vcpu * const cur =
+ CSCHED_VCPU(per_cpu(schedule_data, cpu).curr);
cpumask_t mask;
ASSERT(cur);
INIT_LIST_HEAD(&spc->runq);
spc->runq_sort_last = csched_priv.runq_sort;
- schedule_data[cpu].sched_priv = spc;
+ per_cpu(schedule_data, cpu).sched_priv = spc;
/* Start off idling... */
- BUG_ON( !is_idle_vcpu(schedule_data[cpu].curr) );
+ BUG_ON( !is_idle_vcpu(per_cpu(schedule_data, cpu).curr) );
cpu_set(cpu, csched_priv.idlers);
spin_unlock_irqrestore(&csched_priv.lock, flags);
BUG_ON( is_idle_vcpu(vc) );
- if ( schedule_data[vc->processor].curr == vc )
+ if ( per_cpu(schedule_data, vc->processor).curr == vc )
cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
else if ( __vcpu_on_runq(svc) )
__runq_remove(svc);
BUG_ON( is_idle_vcpu(vc) );
- if ( unlikely(schedule_data[cpu].curr == vc) )
+ if ( unlikely(per_cpu(schedule_data, cpu).curr == vc) )
{
CSCHED_STAT_CRANK(vcpu_wake_running);
return;
vc->processor = first_cpu(vc->cpu_affinity);
- spin_unlock_irqrestore(&schedule_data[lcpu].schedule_lock, flags);
+ spin_unlock_irqrestore(&per_cpu(schedule_data, lcpu).schedule_lock,
+ flags);
}
vcpu_unpause(vc);
spc->runq_sort_last = sort_epoch;
- spin_lock_irqsave(&schedule_data[cpu].schedule_lock, flags);
+ spin_lock_irqsave(&per_cpu(schedule_data, cpu).schedule_lock, flags);
runq = &spc->runq;
elem = runq->next;
elem = next;
}
- spin_unlock_irqrestore(&schedule_data[cpu].schedule_lock, flags);
+ spin_unlock_irqrestore(&per_cpu(schedule_data, cpu).schedule_lock, flags);
}
static void
* we could distribute or at the very least cycle the duty.
*/
if ( (csched_priv.master == cpu) &&
- (schedule_data[cpu].tick % CSCHED_ACCT_NTICKS) == 0 )
+ (per_cpu(schedule_data, cpu).tick % CSCHED_ACCT_NTICKS) == 0 )
{
csched_acct();
}
* cause a deadlock if the peer CPU is also load balancing and trying
* to lock this CPU.
*/
- if ( spin_trylock(&schedule_data[peer_cpu].schedule_lock) )
+ if ( spin_trylock(&per_cpu(schedule_data, peer_cpu).schedule_lock) )
{
spc = CSCHED_PCPU(peer_cpu);
speer = csched_runq_steal(spc, cpu, snext->pri);
}
- spin_unlock(&schedule_data[peer_cpu].schedule_lock);
+ spin_unlock(&per_cpu(schedule_data, peer_cpu).schedule_lock);
/* Got one! */
if ( speer )
runq = &spc->runq;
printk(" tick=%lu, sort=%d\n",
- schedule_data[cpu].tick,
+ per_cpu(schedule_data, cpu).tick,
spc->runq_sort_last);
/* current VCPU */
- svc = CSCHED_VCPU(schedule_data[cpu].curr);
+ svc = CSCHED_VCPU(per_cpu(schedule_data, cpu).curr);
if ( svc )
{
printk("\trun: ");
};
#define EDOM_INFO(d) ((struct sedf_vcpu_info *)((d)->sched_priv))
-#define CPU_INFO(cpu) ((struct sedf_cpu_info *)schedule_data[cpu].sched_priv)
+#define CPU_INFO(cpu) \
+ ((struct sedf_cpu_info *)per_cpu(schedule_data, cpu).sched_priv)
#define LIST(d) (&EDOM_INFO(d)->list)
#define EXTRALIST(d,i) (&(EDOM_INFO(d)->extralist[i]))
#define RUNQ(cpu) (&CPU_INFO(cpu)->runnableq)
#define WAITQ(cpu) (&CPU_INFO(cpu)->waitq)
#define EXTRAQ(cpu,i) (&(CPU_INFO(cpu)->extraq[i]))
-#define IDLETASK(cpu) ((struct vcpu *)schedule_data[cpu].idle)
+#define IDLETASK(cpu) ((struct vcpu *)per_cpu(schedule_data, cpu).idle)
#define PERIOD_BEGIN(inf) ((inf)->deadl_abs - (inf)->period)
inf->vcpu = v;
/* Allocate per-CPU context if this is the first domain to be added. */
- if ( unlikely(schedule_data[v->processor].sched_priv == NULL) )
+ if ( unlikely(per_cpu(schedule_data, v->processor).sched_priv == NULL) )
{
- schedule_data[v->processor].sched_priv =
+ per_cpu(schedule_data, v->processor).sched_priv =
xmalloc(struct sedf_cpu_info);
- BUG_ON(schedule_data[v->processor].sched_priv == NULL);
+ BUG_ON(per_cpu(schedule_data, v->processor).sched_priv == NULL);
memset(CPU_INFO(v->processor), 0, sizeof(*CPU_INFO(v->processor)));
INIT_LIST_HEAD(WAITQ(v->processor));
INIT_LIST_HEAD(RUNQ(v->processor));
EDOM_INFO(d)->status |= SEDF_ASLEEP;
- if ( schedule_data[d->processor].curr == d )
+ if ( per_cpu(schedule_data, d->processor).curr == d )
{
cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
}
Save approximation: Always switch to scheduler!*/
ASSERT(d->processor >= 0);
ASSERT(d->processor < NR_CPUS);
- ASSERT(schedule_data[d->processor].curr);
+ ASSERT(per_cpu(schedule_data, d->processor).curr);
- if ( should_switch(schedule_data[d->processor].curr, d, now) )
+ if ( should_switch(per_cpu(schedule_data, d->processor).curr, d, now) )
cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
}
static void poll_timer_fn(void *data);
/* This is global for now so that private implementations can reach it */
-struct schedule_data schedule_data[NR_CPUS];
+DEFINE_PER_CPU(struct schedule_data, schedule_data);
extern struct scheduler sched_bvt_def;
extern struct scheduler sched_sedf_def;
struct vcpu *v, int new_state, s_time_t new_entry_time)
{
ASSERT(v->runstate.state != new_state);
- ASSERT(spin_is_locked(&schedule_data[v->processor].schedule_lock));
+ ASSERT(spin_is_locked(&per_cpu(schedule_data,v->processor).schedule_lock));
v->runstate.time[v->runstate.state] +=
new_entry_time - v->runstate.state_entry_time;
if ( is_idle_vcpu(v) )
{
- schedule_data[v->processor].curr = v;
- schedule_data[v->processor].idle = v;
+ per_cpu(schedule_data, v->processor).curr = v;
+ per_cpu(schedule_data, v->processor).idle = v;
set_bit(_VCPUF_running, &v->vcpu_flags);
}
*/
static void __enter_scheduler(void)
{
- struct vcpu *prev = current, *next = NULL;
- int cpu = smp_processor_id();
- s_time_t now = NOW();
- struct task_slice next_slice;
- s32 r_time; /* time for new dom to run */
+ struct vcpu *prev = current, *next = NULL;
+ s_time_t now = NOW();
+ struct schedule_data *sd;
+ struct task_slice next_slice;
+ s32 r_time; /* time for new dom to run */
ASSERT(!in_irq());
perfc_incrc(sched_run);
- spin_lock_irq(&schedule_data[cpu].schedule_lock);
+ sd = &this_cpu(schedule_data);
- stop_timer(&schedule_data[cpu].s_timer);
+ spin_lock_irq(&sd->schedule_lock);
+
+ stop_timer(&sd->s_timer);
/* get policy-specific decision on scheduling... */
next_slice = ops.do_schedule(now);
r_time = next_slice.time;
next = next_slice.task;
- schedule_data[cpu].curr = next;
+ sd->curr = next;
- set_timer(&schedule_data[cpu].s_timer, now + r_time);
+ set_timer(&sd->s_timer, now + r_time);
if ( unlikely(prev == next) )
{
- spin_unlock_irq(&schedule_data[cpu].schedule_lock);
+ spin_unlock_irq(&sd->schedule_lock);
return continue_running(prev);
}
ASSERT(!test_bit(_VCPUF_running, &next->vcpu_flags));
set_bit(_VCPUF_running, &next->vcpu_flags);
- spin_unlock_irq(&schedule_data[cpu].schedule_lock);
+ spin_unlock_irq(&sd->schedule_lock);
perfc_incrc(sched_ctx);
- prev->sleep_tick = schedule_data[cpu].tick;
+ prev->sleep_tick = sd->tick;
/* Ensure that the domain has an up-to-date time base. */
if ( !is_idle_vcpu(next) )
{
update_vcpu_system_time(next);
- if ( next->sleep_tick != schedule_data[cpu].tick )
+ if ( next->sleep_tick != sd->tick )
send_timer_event(next);
}
struct vcpu *v = current;
unsigned int cpu = smp_processor_id();
- schedule_data[cpu].tick++;
+ per_cpu(schedule_data, cpu).tick++;
if ( !is_idle_vcpu(v) )
{
for ( i = 0; i < NR_CPUS; i++ )
{
- spin_lock_init(&schedule_data[i].schedule_lock);
- init_timer(&schedule_data[i].s_timer, s_timer_fn, NULL, i);
+ spin_lock_init(&per_cpu(schedule_data, i).schedule_lock);
+ init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
init_timer(&t_timer[i], t_timer_fn, NULL, i);
}
for_each_online_cpu ( i )
{
- spin_lock(&schedule_data[i].schedule_lock);
+ spin_lock(&per_cpu(schedule_data, i).schedule_lock);
printk("CPU[%02d] ", i);
- SCHED_OP(dump_cpu_state,i);
- spin_unlock(&schedule_data[i].schedule_lock);
+ SCHED_OP(dump_cpu_state, i);
+ spin_unlock(&per_cpu(schedule_data, i).schedule_lock);
}
local_irq_restore(flags);
#ifndef __XEN_SCHED_IF_H__
#define __XEN_SCHED_IF_H__
+#include <xen/percpu.h>
+
struct schedule_data {
spinlock_t schedule_lock; /* spinlock protecting curr */
struct vcpu *curr; /* current task */
unsigned long tick; /* current periodic 'tick' */
} __cacheline_aligned;
-extern struct schedule_data schedule_data[];
+DECLARE_PER_CPU(struct schedule_data, schedule_data);
static inline void vcpu_schedule_lock(struct vcpu *v)
{
for ( ; ; )
{
cpu = v->processor;
- spin_lock(&schedule_data[cpu].schedule_lock);
+ spin_lock(&per_cpu(schedule_data, cpu).schedule_lock);
if ( likely(v->processor == cpu) )
break;
- spin_unlock(&schedule_data[cpu].schedule_lock);
+ spin_unlock(&per_cpu(schedule_data, cpu).schedule_lock);
}
}
static inline void vcpu_schedule_unlock(struct vcpu *v)
{
- spin_unlock(&schedule_data[v->processor].schedule_lock);
+ spin_unlock(&per_cpu(schedule_data, v->processor).schedule_lock);
}
#define vcpu_schedule_unlock_irq(v) \